# Computations
import pandas as pd
import numpy as np
import calendar
# sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
# Visualisation libraries
## Progress Bar
import progressbar
## Text
from colorama import Fore, Back, Style
from IPython.display import Image, display, Markdown, Latex, clear_output
## plotly
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
import plotly.offline as py
from plotly.subplots import make_subplots
import plotly.express as px
## seaborn
import seaborn as sns
sns.set_style("whitegrid")
sns.set_context("paper", rc={"font.size":12,"axes.titlesize":14,"axes.labelsize":12})
## matplotlib
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.patches import Ellipse, Polygon
import matplotlib.gridspec as gridspec
import matplotlib.colors
from pylab import rcParams
plt.style.use('seaborn-whitegrid')
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (17, 6)
mpl.rcParams['axes.labelsize'] = 14
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['text.color'] = 'k'
%matplotlib inline
import warnings
warnings.filterwarnings("ignore")
In this article, we work on a dataset available from the UCI Machine Learning Repository. The data is related to direct marketing campaigns (phone calls) of a Portuguese banking institution. The classification goal is to predict if the client will subscribe to a term deposit (variable y).
This dataset is based on "Bank Marketing" UCI dataset (please check the description at archive.ics.uci.edu/ml/datasets/Bank+Marketing). The data is enriched by the addition of five new social and economic features/attributes (national wide indicators from a ~10M population country), published by the Banco de Portugal and publicly available at: bportugal.pt/estatisticasweb. This dataset is almost identical to the one used in [Moro et al., 2014] (it does not include all attributes due to privacy concerns).
The data is related to the direct marketing campaigns of a Portuguese banking institution. The marketing campaigns were based on phone calls. Often, more than one contact to the same client was required, in order to access if the product (bank term deposit) would be ('yes') or not ('no') subscribed.
There are four datasets:
The classification goal is to predict if the client will subscribe (yes/no) a term deposit (variable y).
The zip file includes two datasets:
The binary classification goal is to predict if the client will subscribe a bank term deposit (variable y).
Path = 'Data/bank-additional-full.csv'
Data = pd.read_csv(Path, sep=';')
Target = 'Term Deposit Subscription'
Labels = ['No', 'Yes']
display(Data.head(10).style.hide_index().set_precision(2))
| age | job | marital | education | default | housing | loan | contact | month | day_of_week | duration | campaign | pdays | previous | poutcome | emp.var.rate | cons.price.idx | cons.conf.idx | euribor3m | nr.employed | y |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 56 | housemaid | married | basic.4y | no | no | no | telephone | may | mon | 261 | 1 | 999 | 0 | nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | no |
| 57 | services | married | high.school | unknown | no | no | telephone | may | mon | 149 | 1 | 999 | 0 | nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | no |
| 37 | services | married | high.school | no | yes | no | telephone | may | mon | 226 | 1 | 999 | 0 | nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | no |
| 40 | admin. | married | basic.6y | no | no | no | telephone | may | mon | 151 | 1 | 999 | 0 | nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | no |
| 56 | services | married | high.school | no | no | yes | telephone | may | mon | 307 | 1 | 999 | 0 | nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | no |
| 45 | services | married | basic.9y | unknown | no | no | telephone | may | mon | 198 | 1 | 999 | 0 | nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | no |
| 59 | admin. | married | professional.course | no | no | no | telephone | may | mon | 139 | 1 | 999 | 0 | nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | no |
| 41 | blue-collar | married | unknown | unknown | no | no | telephone | may | mon | 217 | 1 | 999 | 0 | nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | no |
| 24 | technician | single | professional.course | no | yes | no | telephone | may | mon | 380 | 1 | 999 | 0 | nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | no |
| 25 | services | single | high.school | no | yes | no | telephone | may | mon | 50 | 1 | 999 | 0 | nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | no |
| Number of Instances | Number of Attributes |
|---|---|
| 41188 | 21 |
| Feature | Description |
|---|---|
| Age | numeric |
| Job | Type of Job (categorical: "admin.","blue-collar","entrepreneur","housemaid","management","retired","self-employed","services","student","technician","unemployed","unknown") |
| Marital | marital status (categorical: "divorced","married","single","unknown"; note: "divorced" means divorced or widowed) |
| Education | (categorical: "basic.4y","basic.6y","basic.9y","high.school","illiterate","professional.course","university.degree","unknown") |
| Default | has credit in default? (categorical: "no","yes","unknown") |
| Housing | has housing loan? (categorical: "no","yes","unknown") |
| Loan | has personal loan? (categorical: "no","yes","unknown") |
Education_Dict = {'basic.4y':1,'basic.6y':2,'basic.9y':3,'high.school':4,
'illiterate':0,'professional.course':5,'university.degree':6,'unknown':-1}
Dataset_Subcategories = {}
Dataset_Subcategories['Bank Client Data'] = [x.title() for x in Data.iloc[:,:7].columns]
| Feature | Description |
|---|---|
| Contact | contact communication type (categorical: "cellular","telephone") |
| Month | last contact month of year (categorical: "jan", "feb", "mar", ..., "nov", "dec") |
| Day of week | last contact day of the week (categorical: "mon","tue","wed","thu","fri") |
| Duration | last contact duration, in seconds (numeric). Important note: this attribute highly affects the output target (e.g., if duration=0 then y="no"). Yet, the duration is not known before a call is performed. Also, after the end of the call y is obviously known. Thus, this input should only be included for benchmark purposes and should be discarded if the intention is to have a realistic predictive model. |
Dataset_Subcategories['Related with the Last Contact of the Current Campaign'] = \
[x.title().replace('_',' ') for x in Data.iloc[:,7:11].columns]
| Feature | Description |
|---|---|
| Campaign | number of contacts performed during this campaign and for this client (numeric, includes last contact) |
| Pdays | number of days that passed by after the client was last contacted from a previous campaign (numeric; 999 means client was not previously contacted) |
| Previous | number of contacts performed before this campaign and for this client (numeric) |
| Poutcome | outcome of the previous marketing campaign (categorical: "failure","nonexistent","success") |
Dataset_Subcategories['Other Attributes'] = [x.title() for x in Data.iloc[:,11:15].columns]
| Feature | Description |
|---|---|
| emp.var.rate | employment variation rate - quarterly indicator (numeric) |
| cons.price.idx | consumer price index - monthly indicator (numeric) |
| cons.conf.idx | consumer confidence index - monthly indicator (numeric) |
| euribor3m | euribor* 3 month rate - daily indicator (numeric) |
| nr.employed | number of employees - quarterly indicator (numeric) |
* the basic rate of interest used in lending between banks on the European Union interbank market and also used as a reference for setting the interest rate on other loans.
Dataset_Subcategories['Social and Economic Context Attributes'] = ['Employment Variation Rate','Consumer Price Index',
'Consumer Confidence Index','Euribor three Month Rate', 'Number of Employees']
| Feature | Description |
|---|---|
| y | has the client subscribed a term deposit? (binary: "yes","no") |
Dataset_Subcategories['Output variable (Desired Target)'] = ['Term Deposit Subscription']
Columns = []
for x in Dataset_Subcategories.keys():
Columns.extend(Dataset_Subcategories[x])
Data.columns = Columns
Data.head()
| Age | Job | Marital | Education | Default | Housing | Loan | Contact | Month | Day Of Week | ... | Campaign | Pdays | Previous | Poutcome | Employment Variation Rate | Consumer Price Index | Consumer Confidence Index | Euribor three Month Rate | Number of Employees | Term Deposit Subscription | |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 56 | housemaid | married | basic.4y | no | no | no | telephone | may | mon | ... | 1 | 999 | 0 | nonexistent | 1.1 | 93.994 | -36.4 | 4.857 | 5191.0 | no |
| 1 | 57 | services | married | high.school | unknown | no | no | telephone | may | mon | ... | 1 | 999 | 0 | nonexistent | 1.1 | 93.994 | -36.4 | 4.857 | 5191.0 | no |
| 2 | 37 | services | married | high.school | no | yes | no | telephone | may | mon | ... | 1 | 999 | 0 | nonexistent | 1.1 | 93.994 | -36.4 | 4.857 | 5191.0 | no |
| 3 | 40 | admin. | married | basic.6y | no | no | no | telephone | may | mon | ... | 1 | 999 | 0 | nonexistent | 1.1 | 93.994 | -36.4 | 4.857 | 5191.0 | no |
| 4 | 56 | services | married | high.school | no | no | yes | telephone | may | mon | ... | 1 | 999 | 0 | nonexistent | 1.1 | 93.994 | -36.4 | 4.857 | 5191.0 | no |
5 rows × 21 columns
def Data_Plot(Inp):
data_info = Inp.dtypes.astype(str).to_frame(name='Data Type')
Temp = Inp.isnull().sum().to_frame(name = 'Number of NaN Values')
data_info = data_info.join(Temp, how='outer')
data_info ['Size'] = Inp.shape[0]
data_info['Percentage'] = 100 - np.round(100*(data_info['Number of NaN Values']/Inp.shape[0]),2)
data_info = data_info.reset_index(drop = False).rename(columns = {'index':'Features'})
#
fig = px.bar(data_info, x= 'Features', y= 'Percentage', color = 'Data Type', text = 'Data Type',
color_discrete_sequence = ['PaleGreen', 'LightCyan', 'PeachPuff', 'Pink', 'Plum'],
hover_data = data_info.columns)
fig.update_layout(plot_bgcolor= 'white', legend=dict(x=1.01, y=.5, traceorder="normal",
bordercolor="DarkGray", borderwidth=1))
fig.update_traces(texttemplate= 6*' ' + '%{label}', textposition='inside')
fig.update_traces(marker_line_color= 'Black', marker_line_width=1., opacity=1)
fig.show()
return data_info
def dtypes_group(Inp, Dict = False):
Temp = Inp.dtypes.to_frame(name='Data Type').sort_values(by=['Data Type'])
Out = pd.DataFrame(index =Temp['Data Type'].unique(), columns = ['Features','Count'])
for c in Temp['Data Type'].unique():
Out.loc[Out.index == c, 'Features'] = [Temp.loc[Temp['Data Type'] == c].index.tolist()]
Out.loc[Out.index == c, 'Count'] = len(Temp.loc[Temp['Data Type'] == c].index.tolist())
Out.index.name = 'Data Type'
Out = Out.reset_index(drop = False)
Out['Data Type'] = Out['Data Type'].astype(str)
if Dict:
Out = dict(zip(Out['Data Type'], Out['Features']))
return Out
_ = Data_Plot(Data)
dType = dtypes_group(Data, Dict = True)
Chaing column categorical vairabvle to title format (for EDA):.
Categorical_Variables = dType['object']
# For EDA
for Feat in Categorical_Variables:
Data[Feat] = Data[Feat].apply(lambda x: x.title())
del Feat
# Eduation
Data['Education'] = Data['Education'].apply(lambda x: x.title().replace('.',' '))
Education_Dict = dict(zip([x.title().replace('.',' ') for x in Education_Dict.keys()],Education_Dict.values()))
display(Data.head(10).style.hide_index().set_precision(2))
Data['Day Of Week'] = Data['Day Of Week'].replace(dict(zip([x for x in calendar.day_abbr],[x for x in calendar.day_name] )))
# Saving to a CSV file
Data.to_csv (Path.split(".")[0]+'_EDA.csv', index = None, header=True)
| Age | Job | Marital | Education | Default | Housing | Loan | Contact | Month | Day Of Week | Duration | Campaign | Pdays | Previous | Poutcome | Employment Variation Rate | Consumer Price Index | Consumer Confidence Index | Euribor three Month Rate | Number of Employees | Term Deposit Subscription |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| 56 | Housemaid | Married | Basic 4Y | No | No | No | Telephone | May | Mon | 261 | 1 | 999 | 0 | Nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | No |
| 57 | Services | Married | High School | Unknown | No | No | Telephone | May | Mon | 149 | 1 | 999 | 0 | Nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | No |
| 37 | Services | Married | High School | No | Yes | No | Telephone | May | Mon | 226 | 1 | 999 | 0 | Nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | No |
| 40 | Admin. | Married | Basic 6Y | No | No | No | Telephone | May | Mon | 151 | 1 | 999 | 0 | Nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | No |
| 56 | Services | Married | High School | No | No | Yes | Telephone | May | Mon | 307 | 1 | 999 | 0 | Nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | No |
| 45 | Services | Married | Basic 9Y | Unknown | No | No | Telephone | May | Mon | 198 | 1 | 999 | 0 | Nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | No |
| 59 | Admin. | Married | Professional Course | No | No | No | Telephone | May | Mon | 139 | 1 | 999 | 0 | Nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | No |
| 41 | Blue-Collar | Married | Unknown | Unknown | No | No | Telephone | May | Mon | 217 | 1 | 999 | 0 | Nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | No |
| 24 | Technician | Single | Professional Course | No | Yes | No | Telephone | May | Mon | 380 | 1 | 999 | 0 | Nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | No |
| 25 | Services | Single | High School | No | Yes | No | Telephone | May | Mon | 50 | 1 | 999 | 0 | Nonexistent | 1.10 | 93.99 | -36.40 | 4.86 | 5191.00 | No |
First, let's convert all Yes/No columns using as follows
$$\begin{cases} -1 & \mbox{Unknown}\\0 &\mbox{No}\\ 1 &\mbox{Yes}\end{cases}$$def Header(Text, L = 100, C = 'Blue', T = 'White'):
BACK = {'Black': Back.BLACK, 'Red':Back.RED, 'Green':Back.GREEN, 'Yellow': Back.YELLOW, 'Blue': Back.BLUE,
'Magenta':Back.MAGENTA, 'Cyan': Back.CYAN}
FORE = {'Black': Fore.BLACK, 'Red':Fore.RED, 'Green':Fore.GREEN, 'Yellow':Fore.YELLOW, 'Blue':Fore.BLUE,
'Magenta':Fore.MAGENTA, 'Cyan':Fore.CYAN, 'White': Fore.WHITE}
print(BACK[C] + FORE[T] + Style.NORMAL + Text + Style.RESET_ALL + ' ' + FORE[C] +
Style.NORMAL + (L- len(Text) - 1)*'=' + Style.RESET_ALL)
def Line(L=100, C = 'Blue'):
FORE = {'Black': Fore.BLACK, 'Red':Fore.RED, 'Green':Fore.GREEN, 'Yellow':Fore.YELLOW, 'Blue':Fore.BLUE,
'Magenta':Fore.MAGENTA, 'Cyan':Fore.CYAN, 'White': Fore.WHITE}
print(FORE[C] + Style.NORMAL + L*'=' + Style.RESET_ALL)
def List_Print(Text, List, C = 'Blue', T = 'White'):
BACK = {'Black': Back.BLACK, 'Red':Back.RED, 'Green':Back.GREEN, 'Yellow': Back.YELLOW, 'Blue': Back.BLUE,
'Magenta':Back.MAGENTA, 'Cyan': Back.CYAN}
FORE = {'Black': Fore.BLACK, 'Red':Fore.RED, 'Green':Fore.GREEN, 'Yellow':Fore.YELLOW, 'Blue':Fore.BLUE,
'Magenta':Fore.MAGENTA, 'Cyan':Fore.CYAN, 'White': Fore.WHITE}
print(BACK[C] + FORE[T] + Style.NORMAL + '%s:' % Text + Style.RESET_ALL + ' %s' % ', '.join(List))
df = Data.copy()
YN_Feat = []
for c in Categorical_Variables:
s = set(df[c].unique().tolist())
if s.issubset({'No', 'Yes', 'Unknown'}):
YN_Feat.append(c)
del c, s
List_Print('Yes/No Features', YN_Feat)
# Converting:
Temp = {'Yes':1, 'No':0, 'Unknown':-1}
for c in YN_Feat:
df[c] = df[c].replace(Temp).astype(int)
del c
display(df[YN_Feat].head().style.hide_index())
## Adding these keys and values to a dictionary
CatVar_dict = {}
for c in YN_Feat:
CatVar_dict[c] = Temp
#substracting YN Features from Categorical_Variables
Categorical_Variables = list(set(Categorical_Variables) - set(YN_Feat))
del YN_Feat, Temp, c
Yes/No Features: Housing, Loan, Default, Term Deposit Subscription
| Housing | Loan | Default | Term Deposit Subscription |
|---|---|---|---|
| 0 | 0 | 0 | 0 |
| 0 | 0 | -1 | 0 |
| 1 | 0 | 0 | 0 |
| 0 | 0 | 0 | 0 |
| 0 | 1 | 0 | 0 |
Moreover,
List_Print('Remaining categorical features', Categorical_Variables, C = 'Red')
Remaining categorical features: Marital, Education, Job, Month, Poutcome, Day Of Week, Contact
For these features, we have,
$$\mbox{Poutcome} = \begin{cases} -1 & \mbox{Nonexistent}\\0 &\mbox{Failure}\\ 1 &\mbox{Success}\end{cases}$$Temp = {'Success':1, 'Failure':0, 'Nonexistent':-1}
df['Poutcome'] = df['Poutcome'].replace(Temp).astype(int)
CatVar_dict['Poutcome'] = Temp
del Temp
Temp = {'Divorced':2, 'Married':1, 'Single':0, 'Unknown':-1}
df['Marital'] = df['Marital'].replace(Temp).astype(int)
CatVar_dict['Marital'] = Temp
del Temp
df['Day Of Week'] = df['Day Of Week'].replace(dict(zip([x for x in calendar.day_name], np.arange(7)))).astype(int)
df['Contact'] = df['Contact'].replace(dict(zip(df['Contact'].unique(), np.arange(len(df['Contact'].unique()))))).astype(int)
# Job
Temp = {'Unknown':-1, 'Unemployed':0, 'Student': 1, 'Housemaid':2,
'Retired':3, 'Blue-Collar':4, 'Self-Employed': 5, 'Services':6,
'Technician':7, 'Admin.':8, 'Management':9, 'Entrepreneur':10 }
df['Job'] = df['Job'].replace(Temp).astype(int)
del Temp
# Month
df['Month'] = df['Month'].replace(dict(zip([x for x in calendar.month_abbr][1:], np.arange(12)))).astype(int)
# Education
df['Education'] = df['Education'].replace(Education_Dict).astype(int)
# Pdays
df.loc[df['Pdays'] == 999, 'Pdays'] = -1
def LogRegPlot(Feat, df = df, Target = Target, C ='HotPink', EC = 'DeepPink' ):
X = df[Feat].values.reshape(-1,1)
Test = np.arange(df[Feat].min(), df[Feat].max()).reshape(-1,1)
y = df[Target].values.reshape(-1,1)
logr = LogisticRegression(solver='newton-cg')
_ = logr.fit(X, y)
Pred_Prop = logr.predict_proba(Test)
fig, ax = plt.subplots(1, 1, figsize=(16,5))
# Right plot
_ = ax.scatter(X, y, color=C, edgecolor = EC)
_ = ax.plot(Test, Pred_Prop[:,1], color='MidnightBlue', lw = 1)
Temp = ax.get_xlim()
_ = ax.hlines(0, Temp[0], Temp[1], linestyles='dashed', lw=1)
_ = ax.hlines(1, Temp[0], Temp[1], linestyles='dashed', lw=1)
_ = ax.set_xlim(Temp)
_ = ax.set_xlabel('Last Contact Duration (in seconds)')
_ = ax.set_ylabel('Probability of %s' % Target)
_ = ax.set_title('Estimated Probability of %s using Logistic Regression' % Target, weight='medium', fontsize = 14 )
LogRegPlot('Duration')
LogRegPlot('Pdays', C='Lime', EC = 'LimeGreen')
def FeatBins(Inp, Bins):
Out = pd.cut(Inp, bins = pd.IntervalIndex.from_tuples([(x, y) for x, y in zip(Bins[:-1],Bins[1:])]))
Temp = np.sort(Out.astype('str').unique())
Dict = dict(zip(Temp, np.arange(len(Temp))))
Out = Out.astype('str').replace(Dict)
return Out
# Age
df['Age'] = FeatBins(df['Age'], [14, 24, 40, 59, 80, 100])
# Campaign
df['Campaign'] = FeatBins(df['Campaign'], [0, 5, 10, 20, 50, 100])
# Consumer Confidence Index
df['Consumer Confidence Index'] = FeatBins(df['Consumer Confidence Index'], [-60, -40, -30, -20])
# Consumer Price Index
df['Consumer Price Index'] = FeatBins(df['Consumer Price Index'], [92, 93, 94, 95])
# Duration
df['Duration'] = FeatBins(df['Duration'], [-1, 100, 200, 400, 800, 1000, 5000])
# Employment Variation Rate
df['Employment Variation Rate'] = FeatBins(df['Employment Variation Rate'], [-4, -3, -2, -1, 0, 1, 2])
# Euribor three Month Rate
df['Euribor three Month Rate'] = FeatBins(df['Euribor three Month Rate'], [0, 1, 2, 3, 4, 5, 6])
# Number of Employees
df['Number of Employees'] = FeatBins(df['Number of Employees'], [4960, 5000, 5030, 5100, 5200, 5300])
# Pdays
## Negative one here means unknown
df['Pdays'] = FeatBins(df['Pdays'], [-2, 0, 5, 10, 20, 30])
Let's take a look at the variance of the features.
Fig, ax = plt.subplots(figsize=(17,16))
Temp = df.drop(columns = [Target]).var().sort_values(ascending = False).to_frame(name= 'Variance').round(2).T
_ = sns.heatmap(Temp, ax=ax, annot=True, square=True, cmap =sns.color_palette("OrRd", 20),
linewidths = 0.8, vmin=0, vmax=Temp.max(axis =1)[0],
cbar_kws={'label': 'Feature Variance', "aspect":40, "shrink": .4, "orientation": "horizontal"})
labels = [x.replace(' ','\n').replace('Euribor\nthree','Euribor three').replace('\nof\n',' of\n')
for x in [item.get_text() for item in ax.get_xticklabels()]]
_ = ax.set_xticklabels(labels)
_ = ax.set_yticklabels('')
Furthermore, we would like to standardize features by removing the mean and scaling to unit variance. In this article, we demonstrated the benefits of scaling data using StandardScaler().
# Columns
Temp = df.drop(columns = Target).columns.tolist()
# Scaling
scaler = StandardScaler()
_ = scaler.fit(df[Temp])
df[Temp] = scaler.transform(df[Temp])
# Variance Plot
Fig, ax = plt.subplots(figsize=(17,16))
Temp = df.drop(columns = [Target]).var().sort_values(ascending = False).to_frame(name= 'Variance').round(2).T
_ = sns.heatmap(Temp, ax=ax, annot=True, square=True, cmap =sns.color_palette('Greens'),
linewidths = 0.8, vmin=0, vmax=Temp.max(axis =1)[0],
cbar_kws={'label': 'Feature Variance', "aspect":40, "shrink": .4, "orientation": "horizontal"})
labels = [x.replace(' ','\n').replace('Euribor\nthree','Euribor three').replace('\nof\n',' of\n')
for x in [item.get_text() for item in ax.get_xticklabels()]]
_ = ax.set_xticklabels(labels)
_ = ax.set_yticklabels('')
Fig, ax = plt.subplots(figsize=(17,16))
Temp = df.corr().round(2)
Temp = Temp.loc[(Temp.index == Target)].drop(columns = Target).T.sort_values(by = Target).T
_ = sns.heatmap(Temp, ax=ax, annot=True, square=True, cmap =sns.color_palette("Greens", n_colors=10),
linewidths = 0.8, vmin=0, vmax=1,
annot_kws={"size": 12},
cbar_kws={'label': Target + ' Correlation', "aspect":40, "shrink": .4, "orientation": "horizontal"})
labels = [x.replace(' ','\n').replace('Euribor\nthree','Euribor three').replace('\nof\n',' of\n')
for x in [item.get_text() for item in ax.get_xticklabels()]]
_ = ax.set_xticklabels(labels)
_ = ax.set_yticklabels('')
df.to_csv (Path.split(".")[0]+'_STD.csv', index = None, header=True)
S. Moro, P. Cortez and P. Rita. A Data-Driven Approach to Predict the Success of Bank Telemarketing. Decision Support Systems, Elsevier, 62:22-31, June 2014
S. Moro, R. Laureano and P. Cortez. Using Data Mining for Bank Direct Marketing: An Application of the CRISP-DM Methodology. In P. Novais et al. (Eds.), Proceedings of the European Simulation and Modelling Conference - ESM'2011, pp. 117-121, Guimaraes, Portugal, October, 2011. EUROSIS. [bank.zip]